web crawling examples with python using urllib,beautifulsoup,re,requests,
python3.6爬取糗事百科热门帖子例子/web crawling qiushibaike.com using python3.6
|
|
python2.7爬取百度贴吧帖子/web crawling tieba.baidu.com using python2.7
|
|
web crawling mzitu.com using python3.6
|
|
python3.6爬取豆瓣上映电影评分/web crawling douban.com using python3.6
|
|
|
|
taobao search web crawling using python3.6
# encoding=utf8
import requests
import re
#获取text
def getHTMLText(url):
try:
r = requests.get(url, timeout = 30)
r.raise_for_status()
r.encoding= r.apparent_encoding
return r.text
except:
return ""
def paserPage(list,html):
try:
plt = re.findall(r'\"view_price\"\:\"[\d.]*\"',html)
tlt = re.findall(r'\"raw_title\"\:\".*?\"',html)
for i in range(len(plt)):
price = eval(plt[i].split(':')[1])
title = eval(tlt[i].split(':')[1])
list.append([price,title])
except:
print("出丑")
def printGoodsList(list):
tplt ="{:4}\t{:8}\t{:16}"
print(tplt.format("序号", "价格", "商品"))
count = 0
for g in list:
count=count+1
print(tplt.format(count,g[0],g[1]))
def main():
goods = '羽绒服'
depth = 3 #爬取页数
start_url = 'https://s.taobao.com/search?q=' + goods + '&sort=sale-desc'
infoList = []
for i in range(depth):
try:
url = start_url + '&s=' + str(44*i)
html = getHTMLText(url)
#print(html)
paserPage(infoList,html)
except:
continue
#print(infoList)
printGoodsList(infoList)
main()